LD := $(CROSS_COMPILE)ld
# These are goodess that applies to all source.
-C_WARNINGS := -Wpointer-arith -Wredundant-decls
+C_WARNINGS := -Wredundant-decls
# _no_ common code can have packed data structures or we are in touble.
C_WARNINGS += -Wpacked
/* We want a continuous logical cpu number space. */
cpu_set(0, cpu_present_map);
cpu_set(0, cpu_online_map);
+ cpu_set(0, cpu_possible_map);
/* Spin up all CPUS, even if there are more than NR_CPUS, because
* Open Firmware has them spinning on cache lines which will
} while (pong == ping);
of_printf("pong = 0x%x\n", pong);
- if (pong != ping)
+ if (pong != ping) {
cpu_set(logical, cpu_present_map);
+ cpu_set(logical, cpu_possible_map);
+ }
cpu = of_getpeer(cpu);
}
#include <public/domctl.h>
#include <public/sysctl.h>
+void arch_getdomaininfo_ctxt(struct vcpu *, vcpu_guest_context_t *);
void arch_getdomaininfo_ctxt(struct vcpu *v, vcpu_guest_context_t *c)
{
memcpy(&c->user_regs, &v->arch.ctxt, sizeof(struct cpu_user_regs));
/* XXX fill in rest of vcpu_guest_context_t */
}
+long arch_do_domctl(struct xen_domctl *domctl,
+ XEN_GUEST_HANDLE(xen_domctl_t) u_domctl);
long arch_do_domctl(struct xen_domctl *domctl,
XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
{
}
}
break;
+ case XEN_DOMCTL_shadow_op:
+ {
+ struct domain *d;
+ ret = -ESRCH;
+ d = find_domain_by_id(domctl->domain);
+ if ( d != NULL )
+ {
+ ret = shadow_domctl(d, &domctl->u.shadow_op, u_domctl);
+ put_domain(d);
+ copy_to_guest(u_domctl, domctl, 1);
+ }
+ }
+ break;
default:
ret = -ENOSYS;
return ret;
}
+long arch_do_sysctl(struct xen_sysctl *sysctl,
+ XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl);
long arch_do_sysctl(struct xen_sysctl *sysctl,
XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl)
{
ret = -EFAULT;
}
break;
- case DOM0_SHADOW_CONTROL:
- {
- struct domain *d;
- ret = -ESRCH;
- d = find_domain_by_id(op->u.shadow_control.domain);
- if ( d != NULL )
- {
- ret = shadow_control_op(d, &op->u.shadow_control, u_dom0_op);
- put_domain(d);
- copy_to_guest(u_dom0_op, op, 1);
- }
- }
- break;
default:
- printk("%s: unsupported op: 0x%x\n", __func__, (op->cmd));
+ printk("%s: unsupported sysctl: 0x%x\n", __func__, (sysctl->cmd));
ret = -ENOSYS;
break;
}
cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
cpumask_t cpu_online_map; /* missing ifdef in schedule.c */
cpumask_t cpu_present_map;
+cpumask_t cpu_possible_map;
/* XXX get this from ISA node in device tree */
ulong isa_io_base;
break;
init_parea(cpuid);
cpu_set(cpuid, cpu_online_map);
+ cpu_set(cpuid, cpu_possible_map);
}
return 0;
#include <xen/config.h>
#include <xen/types.h>
#include <xen/shadow.h>
-#include <public/dom0_ops.h>
static ulong htab_calc_sdr1(ulong htab_addr, ulong log_htab_size)
{
return rc;
}
-int shadow_control_op(struct domain *d,
- dom0_shadow_control_t *sc,
- XEN_GUEST_HANDLE(dom0_op_t) u_dom0_op)
+int shadow_domctl(struct domain *d,
+ xen_domctl_shadow_op_t *sc,
+ XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
{
if ( unlikely(d == current->domain) )
{
switch ( sc->op )
{
- case DOM0_SHADOW_CONTROL_OP_OFF:
+ case XEN_DOMCTL_SHADOW_OP_OFF:
DPRINTK("Shadow is mandatory!\n");
return -EINVAL;
- case DOM0_SHADOW2_CONTROL_OP_GET_ALLOCATION:
+ case XEN_DOMCTL_SHADOW_OP_GET_ALLOCATION:
sc->mb = shadow_get_allocation(d);
return 0;
- case DOM0_SHADOW2_CONTROL_OP_SET_ALLOCATION: {
+ case XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION: {
int rc;
int preempted = 0;
if (preempted)
/* Not finished. Set up to re-run the call. */
rc = hypercall_create_continuation(
- __HYPERVISOR_dom0_op, "h", u_dom0_op);
+ __HYPERVISOR_domctl, "h", u_domctl);
else
/* Finished. Return the new allocation */
sc->mb = shadow_get_allocation(d);
}
#define gnttab_mark_dirty(d, f) mark_dirty((d), (f))
-extern int shadow_control_op(struct domain *d,
- dom0_shadow_control_t *sc,
- XEN_GUEST_HANDLE(dom0_op_t) u_dom0_op);
+extern int shadow_domctl(struct domain *d,
+ xen_domctl_shadow_op_t *sc,
+ XEN_GUEST_HANDLE(xen_domctl_t) u_domctl);
extern unsigned int shadow_teardown(struct domain *d);
extern unsigned int shadow_set_allocation(
struct domain *d, unsigned int megabytes, int *preempted);
-/* Return the size of the shadow2 pool, rounded up to the nearest MB */
+/* Return the size of the shadow pool, rounded up to the nearest MB */
static inline unsigned int shadow_get_allocation(struct domain *d)
{
return (1ULL << (d->arch.htab.order + PAGE_SHIFT)) >> 20;